diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md index cb1f84d378b..84ffce8bbf8 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md index 72029ee5249..0a670e1d646 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Sorties structurées excerpt: Découvrez comment utiliser les sorties structurées avec OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md index 72029ee5249..0a670e1d646 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Sorties structurées excerpt: Découvrez comment utiliser les sorties structurées avec OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md index a49a160a73c..b6292f25131 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Structured Output excerpt: Learn how to use Structured Output with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md index 544d261dc42..9716cc4ac66 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md index 217fd3f89bb..e69413f2347 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Appel de fonctions excerpt: Découvrez comment utiliser l'appel de fonctions avec OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md index 217fd3f89bb..e69413f2347 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Appel de fonctions excerpt: Découvrez comment utiliser l'appel de fonctions avec OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md index 429e28589a9..4aec5dd33b2 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Function Calling excerpt: Learn how to use Function Calling with OVHcloud AI Endpoints -updated: 2025-08-06 +updated: 2025-12-19 --- > [!primary] @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md index b1df5f59661..b5f4a6d5dbf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Transcrire et résumer des fichiers audio (EN) excerpt: "Résumer des heures de réunions et conversations audio avec des APIs d'ASR et de LLM" -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md index b1df5f59661..b5f4a6d5dbf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Transcrire et résumer des fichiers audio (EN) excerpt: "Résumer des heures de réunions et conversations audio avec des APIs d'ASR et de LLM" -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md index 9351da605b4..b8b091f7e3a 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own audio summarizer excerpt: Summarize hours of meetings ASR and LLM AI endpoints -updated: 2025-10-24 +updated: 2025-12-19 --- > [!primary] @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md index 91e268d8fec..d563c2a68f9 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md index 439e53f413b..71e6977b321 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer son assistant audio (EN) excerpt: "Créez un chatbot vocal en utilisant des APIs d'ASR, de LLM et de TTS en moins de 100 lignes de code" -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md index 439e53f413b..71e6977b321 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer son assistant audio (EN) excerpt: "Créez un chatbot vocal en utilisant des APIs d'ASR, de LLM et de TTS en moins de 100 lignes de code" -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md index 3612f61c772..075d9458f75 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own voice assistant excerpt: Create a voice-enabled chatbot using ASR, LLM, and TTS endpoints in under 100 lines of code -updated: 2025-10-01 +updated: 2025-12-19 --- > [!primary] @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md index 1b78d52a568..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-10-29 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md index 1b78d52a568..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-10-29 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md index 9331eae8541..65e87f139bf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créer un assistant de code avec Continue (EN) excerpt: "Créez votre assistant de code directement dans votre IDE (VSCode ou JetBrains) à l'aide du plug-in Continue" -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md index 9331eae8541..65e87f139bf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créer un assistant de code avec Continue (EN) excerpt: "Créez votre assistant de code directement dans votre IDE (VSCode ou JetBrains) à l'aide du plug-in Continue" -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md index 633711d1480..bab0659625f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a code assistant with Continue excerpt: Build your own code assistant directly in VSCode or JetBrains IDEs using the Continue plugin -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md index c666ab460c6..ce4e7ae6fd4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot en Python avec LangChain (EN) excerpt: Apprenez à développer un chatbot en Python en utilisant LangChain et AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md index c666ab460c6..ce4e7ae6fd4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot en Python avec LangChain (EN) excerpt: Apprenez à développer un chatbot en Python en utilisant LangChain et AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md index 20e9ddd9abc..6eae757fede 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a Python Chatbot with LangChain excerpt: Learn how to build a chatbot in Python using LangChain and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md index 38a10f4a819..43f5239d137 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot en JavaScript avec LangChain (EN) excerpt: Apprenez à déveleopper un chatbot en utilisant JavaScript, LangChain et AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md index 38a10f4a819..43f5239d137 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot en JavaScript avec LangChain (EN) excerpt: Apprenez à déveleopper un chatbot en utilisant JavaScript, LangChain et AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md index e7c604c17ab..31482c83b45 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a JavaScript Chatbot with LangChain excerpt: Learn how to build a chatbot using JavaScript, LangChain, and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md index a6a66754469..f06ea502674 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créez votre propre chatbot IA à l'aide de LangChain4j et Quarkus (EN) excerpt: Découvrez comment construire un chatbot en utilisant LangChain4j, Quarkus et OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md index a6a66754469..f06ea502674 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créez votre propre chatbot IA à l'aide de LangChain4j et Quarkus (EN) excerpt: Découvrez comment construire un chatbot en utilisant LangChain4j, Quarkus et OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md index e133eeec674..a1f3f9bc8c4 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create your own AI chatbot using LangChain4j and Quarkus excerpt: Learn how to build an AI-powered chatbot using LangChain4j, Quarkus, and OVHcloud AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md index 9157c20ef7a..fbe11e9b982 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créer un chatbot de streaming avec LangChain4j et Quarkus (EN) excerpt: Découvrez comment créer un chatbot basé sur Java qui diffuse les réponses de AI Endpoints pour offrir une expérience de chat en temps réel -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md index 9157c20ef7a..fbe11e9b982 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Créer un chatbot de streaming avec LangChain4j et Quarkus (EN) excerpt: Découvrez comment créer un chatbot basé sur Java qui diffuse les réponses de AI Endpoints pour offrir une expérience de chat en temps réel -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md index 3267ea6f35e..0c769539be6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Streaming Chatbot with LangChain4j and Quarkus excerpt: How to build a Java-based chatbot that streams responses from AI Endpoints for a real-time chat experience -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md index 3aa9a227053..0efa3e1a5cf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Activer la mémoire de conversation dans votre chatbot avec LangChain (EN) excerpt: Intégrez la mémoire de conversation à votre chatbot en utilisant AI Endpoints et LangChain -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md index 3aa9a227053..0efa3e1a5cf 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Activer la mémoire de conversation dans votre chatbot avec LangChain (EN) excerpt: Intégrez la mémoire de conversation à votre chatbot en utilisant AI Endpoints et LangChain -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md index a7f3fdf47e9..0f122d92aa5 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Enable conversational memory in your chatbot using LangChain excerpt: How to integrate conversational memory into your chatbot using AI Endpoints and LangChain’s memory modules -updated: 2025-07-31 +updated: 2025-12-19 --- > [!primary] @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md index e1aee059416..5c73ef24c9c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot avec mémoire en utilisant LangChain4j (EN) excerpt: "Apprenez à mettre en œuvre la mémoire dans un chatbot de discussion Java à l'aide de LangChain4j" -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md index e1aee059416..5c73ef24c9c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot avec mémoire en utilisant LangChain4j (EN) excerpt: "Apprenez à mettre en œuvre la mémoire dans un chatbot de discussion Java à l'aide de LangChain4j" -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md index 5e82baac280..b602445a201 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Create a Memory Chatbot with LangChain4j excerpt: Learn how to implement memory in a Java-based chatbot using LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md index 0ea2b4879d2..35dc69b405d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot de discussion RAG avec LangChain (EN) excerpt: Apprenez à construire un chatbot RAG (Retrieval Augmented Generation) en utilisant Python et LangChain -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md index 0ea2b4879d2..35dc69b405d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot de discussion RAG avec LangChain (EN) excerpt: Apprenez à construire un chatbot RAG (Retrieval Augmented Generation) en utilisant Python et LangChain -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md index 797664ced93..c26a881cc22 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Python, LangChain and AI Endpoints -updated: 2025-10-30 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md index 7afd33f1297..a2e533ebdca 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot de discussion RAG avec LangChain4j (EN) excerpt: Apprenez à construire un chatbot RAG (Retrieval Augmented Generation) en utilisant Java et LangChain4j -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md index 7afd33f1297..a2e533ebdca 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Développer un chatbot de discussion RAG avec LangChain4j (EN) excerpt: Apprenez à construire un chatbot RAG (Retrieval Augmented Generation) en utilisant Java et LangChain4j -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md index 30474a76213..2369d121492 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md @@ -1,7 +1,7 @@ --- title: AI Endpoints - Build a RAG Chatbot with LangChain4j excerpt: Learn how to build a RAG (Retrieval Augmented Generation) chatbot using Java, LangChain4j and AI Endpoints -updated: 2025-04-28 +updated: 2025-12-19 --- > [!primary] @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build();