|
46 | 46 | from huggingface_hub.inference._providers.novita import NovitaConversationalTask, NovitaTextGenerationTask |
47 | 47 | from huggingface_hub.inference._providers.nscale import NscaleConversationalTask, NscaleTextToImageTask |
48 | 48 | from huggingface_hub.inference._providers.openai import OpenAIConversationalTask |
| 49 | +from huggingface_hub.inference._providers.ovhcloud import OVHcloudConversationalTask |
49 | 50 | from huggingface_hub.inference._providers.publicai import PublicAIConversationalTask |
50 | 51 | from huggingface_hub.inference._providers.replicate import ( |
51 | 52 | ReplicateAutomaticSpeechRecognitionTask, |
@@ -1423,6 +1424,57 @@ def test_prepare_url(self): |
1423 | 1424 | assert helper._prepare_url("sk-XXXXXX", "gpt-4o-mini") == "https://api.openai.com/v1/chat/completions" |
1424 | 1425 |
|
1425 | 1426 |
|
| 1427 | +class TestOVHcloudAIEndpointsProvider: |
| 1428 | + def test_prepare_hf_url_conversational(self): |
| 1429 | + helper = OVHcloudConversationalTask() |
| 1430 | + url = helper._prepare_url("hf_token", "username/repo_name") |
| 1431 | + assert url == "https://router.huggingface.co/ovhcloud/v1/chat/completions" |
| 1432 | + |
| 1433 | + def test_prepare_url_conversational(self): |
| 1434 | + helper = OVHcloudConversationalTask() |
| 1435 | + url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
| 1436 | + assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" |
| 1437 | + |
| 1438 | + def test_prepare_payload_as_dict(self): |
| 1439 | + helper = OVHcloudConversationalTask() |
| 1440 | + payload = helper._prepare_payload_as_dict( |
| 1441 | + [ |
| 1442 | + {"role": "system", "content": "You are a helpful assistant"}, |
| 1443 | + {"role": "user", "content": "Hello!"}, |
| 1444 | + ], |
| 1445 | + { |
| 1446 | + "max_tokens": 512, |
| 1447 | + "temperature": 0.15, |
| 1448 | + "top_p": 1, |
| 1449 | + "presence_penalty": 0, |
| 1450 | + "stream": True, |
| 1451 | + }, |
| 1452 | + InferenceProviderMapping( |
| 1453 | + provider="ovhcloud", |
| 1454 | + hf_model_id="meta-llama/Llama-3.1-8B-Instruct", |
| 1455 | + providerId="Llama-3.1-8B-Instruct", |
| 1456 | + task="conversational", |
| 1457 | + status="live", |
| 1458 | + ), |
| 1459 | + ) |
| 1460 | + assert payload == { |
| 1461 | + "max_tokens": 512, |
| 1462 | + "messages": [ |
| 1463 | + {"content": "You are a helpful assistant", "role": "system"}, |
| 1464 | + {"role": "user", "content": "Hello!"}, |
| 1465 | + ], |
| 1466 | + "model": "Llama-3.1-8B-Instruct", |
| 1467 | + "presence_penalty": 0, |
| 1468 | + "stream": True, |
| 1469 | + "temperature": 0.15, |
| 1470 | + "top_p": 1, |
| 1471 | + } |
| 1472 | + |
| 1473 | + def test_prepare_route_conversational(self): |
| 1474 | + helper = OVHcloudConversationalTask() |
| 1475 | + assert helper._prepare_route("username/repo_name", "hf_token") == "/v1/chat/completions" |
| 1476 | + |
| 1477 | + |
1426 | 1478 | class TestReplicateProvider: |
1427 | 1479 | def test_automatic_speech_recognition_payload(self): |
1428 | 1480 | helper = ReplicateAutomaticSpeechRecognitionTask() |
|
0 commit comments